how to deploy hadoop workflow environment with shell on centos7

  1. download tarball

    you can download tarball from here.

    hadoop and hive come from apache, oozie and sqoop come from hortonworks.

    ls
    hadoopWorkflow.sh hadoop.tar.gz ips
    cat ips
    192.168.1.1
    192.168.1.2
    
  2. script content

    #!/bin/bash
    if [ -z $(pidof mysqld) ];then
    echo "please manually install the mysql5.7 master-slave service and create user=chenshi(password=chenshi.net)"
    exit 0
    fi
    if [ ! -s /root/ips ];then
    echo "please write ip addresses of servers into ips file"
    exit 0
    fi
    if [ -e /etc/yum.repos.d/local.repo ];then
    yum install expect pssh -y
    else
    yum install epel-release -y && yum install expect pssh -y
    fi
    if [ ! -e /root/.ssh/id_rsa ];then
    /usr/bin/expect <<- EOF
    spawn ssh-keygen -t rsa -b 2048
    expect ":"
    send "\n"
    expect ":"
    send "\n"
    expect ":"
    send "\n"
    expect eof
    EOF
    fi
    if [ ! -e /root/.ssh/authorized_keys ];then
    echo “please input passwords of servers:”
    for i in $(cat ips);do
    ssh-copy-id -o StrictHostKeyChecking=no $i
    done
    fi
    change_hostname(){
    ips=$(cat ips|wc -l)
    for i in `seq 1 $ips`;do
    pssh -H $(sed -n "$i"p ips) -l root hostnamectl set-hostname hadoop$i.chenshi.net
    echo hadoop$i.chenshi.net >> slaves
    echo $(sed -n "$i"p ips) hadoop$i.chenshi.net >> tmp
    done
    pscp.pssh -h ips -l root slaves tmp /tmp/
    pssh -h ips -l root "cat /tmp/tmp >> /etc/hosts;rm -f /tmp/tmp"
    rm -f slaves tmp
    }
    if [ $(cat /etc/hosts|wc -l) -le 2 ];then
    change_hostname
    else
    echo "please check /etc/hosts file"
    exit 0
    fi
    check_data_init(){
    if [ $? -ne 0 ];then
    echo "$1 data initialization failed"
    exit 0
    fi
    }
    pscp.pssh -h ips -l root hadoop.tar.gz /usr/local/
    pssh -h ips -l root -t 300 "tar zxf /usr/local/hadoop.tar.gz -C /usr/local/;yes|mv /tmp/slaves /usr/local/hadoop/etc/hadoop/slaves;mkdir -p /data/{tmp,nn,dn};yum install java-1.8.0-openjdk java-1.8.0-openjdk-devel -y;sed -i '12a export JAVA_HOME=/usr/lib/jvm/jre-1.8.0-openjdk/' /root/.bash_profile;sed -i '10c PATH=$PATH:$HOME/bin:/usr/local/hadoop/bin:/usr/local/hadoop/sbin:/usr/local/hive/bin:/usr/local/oozie/bin:/usr/local/sqoop/bin' /root/.bash_profile;sed -i 's/^.*StrictHostKeyChecking.*$/StrictHostKeyChecking no/g' /etc/ssh/ssh_config"
    . /root/.bash_profile
    hdfs namenode -format
    check_data_init hdfs
    start-dfs.sh && start-yarn.sh
    schematool -dbType mysql -initSchema
    check_data_init hive
    hdfs dfs -mkdir -p /user/root/share/lib/
    cd /usr/local/oozie/
    oozie-setup.sh sharelib create -fs hdfs://hadoop1.chenshi.net:8020 -locallib oozie-sharelib.tar.gz
    ooziedb.sh create -sqlfile oozie.sql -run
    check_data_init oozie
    cd -
    pssh -h ips -l root ". /root/.bash_profile;oozied.sh start"
    sleep 30
    if [ "$(oozie admin -oozie http://hadoop1.chenshi.net:11000/oozie -status)" != "System mode: NORMAL" ];then
    echo "please check oozie service problem"
    fi
    echo "done,oozie job.properties file is in /usr/local/oozie/conf/"
    

    references:

    https://docs.cloudera.com/HDPDocuments/HDP2/HDP-2.6.2/bk_command-line-installation/content/ch_getting_ready_chapter.html